This notebook is part of an exploratory analysis of machine learning used to decompose hyperspectral datasets of hybrid perovskite nanoscale materials. Two machine learning models are mainly used in this project: Nonnegative Matrix Factorization (NMF) and Variational Autoencoders (VAEs).

This notebook generates artificial hyperspectral data to test NMF and VAE models and evaluate their accuracy.

Notebook Six: Using Artificial Data

Imports, Functions, and Classes¶

Imports¶

In [1]:
from preprocessing import *

#import PIL
from PIL import ImageFilter
from PIL import Image
from scipy.special import wofz

import nimfa
import sklearn
from sklearn.decomposition import NMF
from sklearn import metrics

import torch; torch.manual_seed(0)
from torchvision.utils import make_grid
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
import torch.distributions
import torchvision
import pyroved as pv
import pyro.distributions as dist
import pyro
from pyroved.models.base import baseVAE
from pyroved.nets import fcDecoderNet, fcEncoderNet, sDecoderNet
from pyroved.utils import (
    generate_grid, get_sampler, set_deterministic_mode, to_onehot, transform_coordinates)

save_directory = "./svg-figures/artificial-data/" # modify to save figures as a specified file extension
file_extension = ".svg"
SEM images: f1_img1, f2_img1
CL images: f1_img2, f2_img2
Denoised data: f1_sb_median, f2_sb_median
2D denoised data: f1_denoised_2d, f2_denoised_2d
Example points: f1_x_points, f1_y_points, f2_x_points, f2_y_points
Wavelengths and dimensions: f1_wav, f2_wav, f1_xpix, f1_ypix, f1_zpix, f2_xpix, f2_ypix, f2_zpix

Functions¶

Distributions¶

In [2]:
# 3 distribution functions: gaussian, lorentzian, and voigt

def gaussian(x, amp1, cen1, sigma1):
    return amp1 * (1 / (sigma1 * (np.sqrt(2 * np.pi)))) * (np.exp((-1.0 / 2.0) * (((x - cen1) / sigma1)**2)))

def lorentzian(x, amp1, cen1, wid1):
    return (amp1 * wid1**2 / ((x - cen1)**2 + wid1**2))

def voigt(x, x0, y0, a, sig, gam):
    return y0 + a * np.real(wofz((x - x0 + 1j * gam) / sig / np.sqrt(2))) / sig / np.sqrt(2 * np.pi)

NMF Functions¶

In [293]:
# SCIKIT-LEARN

def nmf_plot(suptitle, spect_matrices, img_matrices, xpix, ypix, save=False):

    rows = len(spect_matrices)
    columns = len(img_matrices)+3
    gs = GridSpec(rows, columns)
    gs.update(wspace=0.09,hspace=0)
    figaspect = plt.figaspect(float(ypix*3)/float(xpix*6))
    figsize = (5,8/(figaspect[0]/figaspect[1]))

    fig = plt.figure(figsize=figsize)
    #fig.patch.set_facecolor('#00000000')
    fig.suptitle(suptitle, fontsize=10)

    row_count = 0
    for i in range(rows):

        fig_plt = fig.add_subplot(gs[row_count,:2])
        fig_plt.set_xlim([0, 299])

        column_count = 0
        for j in range(row_count + 2):

            fig_img = fig.add_subplot(gs[row_count, column_count + 2])
            fig_img.imshow(img_matrices[row_count][column_count, :].reshape(ypix, xpix))
            fig_img.set_xticks([])
            fig_img.set_yticks([])
            fig_img.tick_params(color=color[j])
            for spine in fig_img.spines.values():
                spine.set_edgecolor(color[j])
                spine.set(lw=2)

            fig_plt.plot(spect_matrices[row_count][:, column_count], c=color[j], lw=1)
            fig_plt.set_xticks([])
            fig_plt.set_xticklabels([])
            fig_plt.tick_params(axis='both', direction='out', length=3, width=1, labelsize=3)
            if row_count == rows-1:
                fig_plt.set_xticks([75, 150, 225])
                fig_plt.set_xticklabels([75, 150, 225])

            column_count += 1

        row_count += 1
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)
    plt.show()

def get_variance(model, data):
    """ Estimate performance of the model on the data """
    prediction = model.inverse_transform(model.transform(data))
    return metrics.explained_variance_score(data, prediction, multioutput="variance_weighted")

def error_analysis(models, data):
    temp = []
    for model in models:
        temp.append(get_variance(model.fit(data), data))
    return temp

def explained_variance_plot(suptitle, rgb_results, hsi1_results, hsi_results, save=False):

    fig = plt.figure(figsize=(4, 3))
    #fig.patch.set_facecolor("#00000000")
    gs = GridSpec(1, 1)
    gs.update(wspace=0.1, hspace=0.1)
    plt.rcParams["font.weight"] = "bold"
    plt.rcParams["axes.labelweight"] = "bold"

    fig_plt = fig.add_subplot(gs[0, 0])
    fig_plt.plot([2, 3, 4, 5, 6], rgb_results[0], ms=10, marker='o', lw=2, c=color[0], label="Calibrator")
    fig_plt.plot([2, 3, 4, 5, 6], hsi1_results[0], ms=10, marker='o', lw=2, c=color[1], label="Noiseless Data")
    fig_plt.plot([2, 3, 4, 5, 6], hsi_results[0], ms=10, marker='o', lw=2, c=color[2], label="Noisy Data")

    fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
    fig_plt.set_title(suptitle)
    fig_plt.set_ylabel("EVR")
    fig_plt.set_xlabel("Number of Components")
    fig_plt.set_ylim(0.98, 1.001)
    fig_plt.set_xticks([2, 3, 4, 5, 6])
    fig_plt.set_xticklabels([2, 3, 4, 5, 6])
    fig_plt.tick_params(axis='both', direction='out', length=6, width=2, labelsize=6)
    fig_plt.grid()
    fig_plt.legend()

    if save:
        fig.savefig(save_directory + suptitle + file_extension, bbox_inches="tight")
    
    
# NIMFA

def nimfa_nmf_model(data, iterations, n_comp):
    # Returns list of 2 matrices, W and H respectively
    nmf = nimfa.Nmf(data, max_iter=iterations, rank=n_comp, 
                               update=nimfa_nmf_update, objective='fro')
    nmf_fit = nmf()
    W = nmf_fit.basis()
    H = nmf_fit.coef()
    return [W, H]


def snmf_model(data, sparseness_level, n_comp, iterations, version):
    # Returns list of 2 matrices, W and H respectively, and the snmf model
    snmf = nimfa.Snmf(data, rank=n_comp, max_iter=iterations, version=version, 
                      beta=sparseness_level)
    snmf_fit = snmf()
    W = snmf_fit.basis()
    H = snmf_fit.coef()
    return [W, H, snmf_fit.fit.sparseness()], snmf


def snmf_plot(suptitle, snmf_matrices, sparseness_levels, n_comp, xpix, ypix, save=False):

    rows = len(sparseness_levels)
    columns = n_comp + 2
    gs = GridSpec(rows, columns)
    gs.update(wspace=0.1,hspace=0.1)
    figsize = (columns, rows)
    
    fig = plt.figure(figsize=figsize)
    fig.patch.set_facecolor('white')
    fig.suptitle(suptitle, fontsize=10)

    row_count = 0
    for i in range(rows):
        
        fig_plt = fig.add_subplot(gs[row_count,columns-2:])
        fig_plt.set_xticks([])
        fig_plt.set_yticks([])
        fig_plt.tick_params(axis='both', direction='out', length=6, width=2)

        column_count = 0
        for j in range(columns-2):

            fig_img = fig.add_subplot(gs[row_count,column_count])
            fig_img.imshow(snmf_matrices[row_count][1][column_count,:].reshape(ypix, xpix))
            fig_img.set_xticks([])
            fig_img.set_yticks([])
            fig_img.tick_params(color=color[j])
            for spine in fig_img.spines.values():
                spine.set_edgecolor(color[j])
                spine.set(lw=2)

            fig_plt.plot(snmf_matrices[row_count][0][:,column_count],
                         c=color[j], lw=1)
            fig_plt.set_title(
                "beta = {:} \nW sparseness = {:.3f} \nH sparseness = {:.3f}".format(
                        sparseness_levels[row_count],
                        snmf_matrices[row_count][2][0],
                        snmf_matrices[row_count][2][1]), y=1.0, pad=-20, fontsize=3)

            column_count += 1

        row_count += 1
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)
        
def error_analysis_nimfa(models):
    temp = []
    for model in models:
        temp.append(model.evar())
    return temp

def sparseness_evr_plot(suptitle, rgb_results, hsi1_results, hsi_results, 
                        sparseness_levels, save=False):
    
    points = len(sparseness_levels)

    fig = plt.figure(figsize=(4, 3))
    fig.suptitle(suptitle, fontsize=10)
    #fig.patch.set_facecolor("#00000000")
    gs = GridSpec(1, 1)
    gs.update(wspace=0.1, hspace=0.1)
    #plt.rcParams["font.weight"] = "bold"
    #plt.rcParams["axes.labelweight"] = "bold"

    fig_plt = fig.add_subplot(gs[0, 0])
    fig_plt.plot(sparseness_levels, rgb_results[0], ms=10, marker='o', lw=2, 
                 c=color[0], label="Calibrator")
    fig_plt.plot(sparseness_levels, hsi1_results[0], ms=10, marker='o', lw=2, 
                 c=color[1], label="Noiseless Data")
    fig_plt.plot(sparseness_levels, hsi_results[0], ms=10, marker='o', lw=2, 
                 c=color[2], label="Noisy Data")

    fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
    #fig_plt.set_title("Explained Variance for Blind NMF")
    fig_plt.set_ylabel("EVR")
    fig_plt.set_xlabel("Sparseness Level")
    fig_plt.set_ylim(0.992, 1.001)
    fig_plt.set_xscale("log")
    fig_plt.set_xticks(sparseness_levels)
    fig_plt.set_xticklabels(sparseness_levels)
    fig_plt.tick_params(axis='both', direction='out', length=6, width=2, labelsize=6)
    fig_plt.grid()
    fig_plt.legend()
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension, bbox_inches="tight")

    plt.show()

VAE Functions¶

vae_plot builds a plot of the latent space dimensions with two or three latent dimensions and plots points corresponding to single pixels in the x and y axes of the original dataset. The point is to show a relationship between different spectral features detected by the autoencoder. Below the plot is an image component of the latent representation of the data.

spectra_plot plots each spectrum in the image color mapped according to its value along a specified axis.

ssvae_plot plots the latent space and image reconstruction of SSVAE models

pointfinder_plot shows every spectrum with a certain x value (along a vertical line in the image).

spectsig_plot shows all labeled spectra color mapped according to each class

ssvae_label_plot shows the first 180 spectra the SSVAE model assigns to a specified class

spect_label_plot plot one spectrum from each class in the labeled data

custom_spect_label_plot plot one spectrum from each class in the latent representation of the data

In [250]:
marker_ec = "black"
marker_lw = 0.08
marker_alpha = 0.5
marker_s = 10

# Plotting pixels containing entire spectra in a latent space and corresponding images
def vae_plot(suptitle, z_mean, xpix, ypix, save=False):
    
    rows = 2
    columns = len(z_mean[0])
    
    fig = plt.figure(figsize=(4,4))
    fig.suptitle(suptitle, fontsize=10)
    fig.patch.set_facecolor("white")
    
    for i in range(rows):
        for j in range(columns):
            
            if i == 0: # First row
                
                if columns == 2: # 2D Latent space
                    ax = fig.add_subplot(2, 2, j + 1)
                    ax.scatter(z_mean[:, 0], z_mean[:, 1], c=z_mean[:, j], cmap=cmap,
                            ec=marker_ec, lw=marker_lw, alpha=marker_alpha, s=marker_s)
                    ax.tick_params(axis='both', which='major', labelsize=4, direction='out', 
                               length=2, width=0.5)
                    
                else: # 3D Latent space
                    ax = fig.add_subplot(2, 3, j + 1, projection='3d')
                    ax.scatter(z_mean[:, 0], z_mean[:, 1], z_mean[:, 2], 
                            c=z_mean[:, j], cmap=cmap,
                            ec=marker_ec, lw=marker_lw, alpha=marker_alpha, s=marker_s)
                    ax.set_xticklabels([])
                    ax.set_yticklabels([])
                    ax.zaxis.set_ticklabels([])
                    ax.set_xlabel("X", size=5, labelpad=-15)
                    ax.set_ylabel("Y", size=5, labelpad=-15)
                    ax.set_zlabel("Z", size=5, labelpad=-15)
                    if j == 0:
                        ax.view_init(30, -85) 
                    elif j == 1:
                        ax.view_init(30, -40)
                    elif j == 2:
                        ax.view_init(30, 5)
                
                if j == 0:
                    ax.set_title('Latent Space', size=7)
            
            if i == 1: # Second row
                
                if columns == 2: # 2D Latent Space
                    img = fig.add_subplot(2, 2, j+3)
                else: # 3D Latent Space
                    img = fig.add_subplot(2, 3, j+4)
                    
                img.imshow(z_mean[:,j].reshape(ypix, xpix), cmap=cmap)
                img.set_xticks([])
                img.set_yticks([])
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)
        
# Spectra Plot (for determining latent dimensions)
def spectra_plot(suptitle, img, array, xpix, ypix, save=False):
    
    n = xpix*ypix # total number of spectra
    cmap = mpl.colormaps['viridis'](np.linspace(0,1,n)) # create colormap
    fig = plt.figure(figsize=(6,3))
    fig.suptitle(suptitle, fontsize=10)
    fig.patch.set_facecolor('white')
    ax = fig.add_subplot()
    ax.set_prop_cycle('color', list(cmap)) # colormaps each spectra from first to last in array
    ax.tick_params(axis='both', which='major', labelsize=5, direction='out', length=2, width=0.5)
    #plt.xlim(short_wav - 5, long_wav + 5)
    #ax.set_xticks([500, 750])
    #ax.set_xticklabels([])
    #plt.yscale('log')
    
    for i in array:
        ax.plot(img[:,i[0],i[1]], lw=0.03, alpha=0.5)
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)

        
# SEMI-SUPERVISED VAE

# Plot the latent space and image reconstruction of SSVAE models
def ssvae_plot(suptitle, z_mean, z_labels, xpix, ypix, save=False):
    
    rows = 2
    columns = 1
    
    cmap = mpl.colormaps['viridis']
    fig = plt.figure(figsize=(4,4))
    fig.suptitle(suptitle, fontsize=10)
    gs = GridSpec(rows, columns)
    #fig.patch.set_facecolor('#00000000')
    fig.patch.set_facecolor('white')
    
    for i in range(rows):
        column_count = 0
        for j in range(columns):
            if i == 0:
                ax = fig.add_subplot(gs[0,column_count])
                ax.scatter(z_mean[:,0], z_mean[:,1], c=z_labels, cmap=cmap,
                        ec=marker_ec, lw=marker_lw, alpha=marker_alpha, s=marker_s)
                #ax.tick_params(axis='both', labelsize=10)
                ax.set_xticks([])
                ax.set_yticks([])
                
            if i == 1:
                img = fig.add_subplot(gs[1,column_count])
                img.imshow(z_labels.reshape(ypix, xpix), cmap=cmap)
                img.set_xticks([])
                img.set_yticks([])

            column_count += 1
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)

# Function to plot every spectrum with a certain x value (along a vertical line in the image)
def pointfinder_plot(img, x_pt, ypix):
    
    rows = ypix
    columns = 1
    fig = plt.figure(figsize=(4, rows))
    
    for i in range(rows):

        ax = fig.add_subplot(rows, columns, i + 1)
        ax.plot(img[:, i, x_pt], c=color[i], lw=2)
        ax.text(0, 0, str([i, x_pt]), fontsize=4)
        ax.set_xticks([])
        ax.set_yticks([])   

    plt.show()

# Plot all spectra in each of the 4 classes
def spectsig_plot(suptitle, img, pts, save=False):
    
    cmap_list = mpl.colormaps['viridis'](np.linspace(0,1,len(pts))) # create colormap
    rows = len(pts[0])
    columns = 4
    gs = GridSpec(rows, columns)
    gs.update(wspace=0, hspace=0)

    figsize = (columns*2, rows)
    fig = plt.figure(figsize=figsize)
    fig.patch.set_facecolor("white")
    fig.suptitle(suptitle, fontsize=20)
    #fig.subplots_adjust(top=0.95, bottom=0.1)

    row_count = 0
    for i in range(rows):

        column_count = 0
        for j in range(columns):

            fig_plt = fig.add_subplot(gs[i,j])
            fig_plt.plot(img[pts[j][i][0],pts[j]
                                  [i][1],:], c=cmap_list[j], lw=2)
            #plt.xlim(short_wav - 5, long_wav + 5) # Cropping the spectra shown in the plot
            #fig_plt.tick_params(axis='both', direction='out', length=12, width=4)
            fig_plt.set_xticks([])
            fig_plt.set_yticks([])

            column_count += 1

        row_count += 1
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)
    plt.show()
    
# This plot shows the first 180 spectra the SSVAE model assigns to a specified class
def ssvae_label_plot(img, pts, starting_index=0, save=False):
    
    rows = 180
    #rows = len(pts)
    columns = 1
    fig = plt.figure(figsize=(4, rows))
    fig.patch.set_facecolor("white")
    #fig.suptitle(suptitle, fontsize=30)
    gs = GridSpec(rows, columns)
    
    row_count = 0
    for i in range(starting_index, starting_index + rows):

        fig_plt = fig.add_subplot(gs[row_count,0])
        fig_plt.plot(img[:, pts[i,0], pts[i,1]], 
                     c=color[row_count], lw=3)
        plt.xticks([])
        plt.yticks([])
        #plt.text(220,350,str((int(pts[i][0]),int(pts[i][1]))))
        plt.text(0, 0, str(i))
        row_count += 1

    #gs.tight_layout(fig, rect=[0,0.03,1,0.95])
    if save:
        fig.savefig(save_directory + suptitle + file_extension)
    plt.show()

# Plot one spectrum from each class in the labeled data
def spect_label_plot(suptitle, img, pts, save=False):
    
    rows = 1
    columns = 4
    gs = GridSpec(rows, columns)
    gs.update(wspace=0,hspace=0.2)

    figsize = (columns*4, rows*3)
    fig = plt.figure(figsize=figsize)
    fig.suptitle(suptitle, size = 20)
    fig.patch.set_facecolor("white")
    #fig.patch.set_facecolor('#00000000')
    #fig.suptitle(suptitle, fontsize=30)
    #fig.subplots_adjust(top=0.95, bottom=0.1)

    row_count = 0
    for i in range(rows):

        column_count = 0
        for j in range(columns):

            fig_plt = fig.add_subplot(gs[i,j])
            fig_plt.plot(img[:,pts[j][i][0],pts[j][i][1]], c=cmap_list[j], lw=3)
            #plt.text(300, 370, str(column_count))
            #fig_plt.set_title(method_names[row_count] + point_names[column_count])
            #fig_plt.tick_params(color=color[j])
            #plt.xlim(short_wav - 5, long_wav + 5) # Cropping the spectra shown in the plot
            #fig_plt.tick_params(axis='both', direction='out', length=12, width=4)
            #fig_plt.set_xticks([400, 550, 750])
            fig_plt.set_yticks([])
            fig_plt.set_xticks([])

            column_count += 1

        row_count += 1
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)
    plt.show()

# Plot one spectrum from each class in the latent representation of the data
def custom_spect_label_plot(suptitle, img, pts, save=False):
    
    rows = 1
    columns = 4
    gs = GridSpec(rows, columns)
    gs.update(wspace=0,hspace=0.2)

    figsize = (columns*4, rows*3)
    fig = plt.figure(figsize=figsize)
    #fig.patch.set_facecolor("#00000000")
    fig.patch.set_facecolor("white")
    fig.suptitle(suptitle, fontsize=20)
    #fig.subplots_adjust(top=0.95, bottom=0.1)
    cmap_list = mpl.colormaps['viridis'](np.linspace(0,1,len(pts))) # create colormap

    row_count = 0
    for i in range(rows):

        column_count = 0
        for j in range(columns):

            fig_plt = fig.add_subplot(gs[i, j])
            fig_plt.plot(img[:, pts[j][0], pts[j][1]], c=cmap_list[j], lw=3)
            #plt.text(300, 370, str(column_count))
            #fig_plt.set_title(method_names[row_count] + point_names[column_count])
            #fig_plt.tick_params(color=color[j])
            #plt.xlim(short_wav, long_wav) # Cropping the spectra shown in the plot
            fig_plt.tick_params(axis='both', direction='out', length=6, width=2)
            #fig_plt.set_xticks([400, 750])
            fig_plt.set_xticks([])
            fig_plt.set_yticks([])
            #fig_plt.set_xticklabels([])
            
            #if column_count == 0:
                #fig_plt.set_yticks([1000])
                #fig_plt.set_yticklabels([])
                #fig_plt.tick_params(axis='y', direction='out', length=12, width=4)
            
            #if row_count == rows-1:
                #fig_plt.set_xticks([400-short_wav, 750-long_wav])
                #fig_plt.set_xticklabels([])
                #fig_plt.tick_params(axis='x', direction='out', length=12, width=4)

            column_count += 1

        row_count += 1
    
    if save:
        fig.savefig(save_directory + suptitle + file_extension)
    plt.show()

Hyperspectral Image Class¶

In [10]:
class HSI:

    # Initialize the HSI
    def __init__(self, img, dist1, dist2, dist3, zpix, ypix, xpix, 
                 filter_type=None, filter_strength=0, noise=0):
    
        self.filter_type = filter_type
        self.filter_strength = filter_strength
        
        # Add image filter
        if self.filter_type == "Gaussian" or "gaussian":
            self.im = img.filter(ImageFilter.GaussianBlur(radius = self.filter_strength))

        if self.filter_type == "Box" or "box":
            self.im = img.filter(ImageFilter.BoxBlur(radius = self.filter_strength))

        if self.filter_type == None or "None" or "none":
            self.im = img
        
        # Initialize noise type
        if noise == 0:
            self.noise = 0
            
        elif noise == 1:
            self.noise = np.random.normal(0, 0.25, zpix)

        elif noise == 2:
            self.noise = np.random.normal(0, 0.5, zpix)

        elif noise ==3 :
            self.noise = np.random.normal(0, 1.0, zpix)
        
        # Add noise to distributions
        self.dist1 = dist1 + self.noise
        self.dist2 = dist2 + self.noise
        self.dist3 = dist3 + self.noise
        
        # Create the HSI
        r, g, b = self.im.split()
        zero = r.point(lambda _ : 0) # create xpix by ypix array of zeros

        red_merge = Image.merge("RGB", (r, zero, zero))
        green_merge = Image.merge("RGB", (zero, g, zero))
        blue_merge = Image.merge("RGB", (zero, zero, b))
        red_array = np.asarray(red_merge)[:, :, 0]
        green_array = np.asarray(green_merge)[:, :, 1]
        blue_array = np.asarray(blue_merge)[:, :, 2]

        red = normalize(red_array.flatten()).tolist()
        blue = normalize(blue_array.flatten()).tolist()
        green = normalize(green_array.flatten()).tolist()

        for i in range(len(red)):
            red[i] = red[i] * self.dist1

        for i in range(len(green)):
            green[i] = green[i] *self.dist2

        for i in range(len(blue)):
            blue[i] = blue[i] * self.dist3

        hyper = []
        for i in range(len(blue)):
            idx = red[i] + green[i] + blue[i]
            hyper.append(idx)

        hyper = np.asarray(hyper).reshape(xpix, ypix, zpix)
        hyper = np.swapaxes(hyper, 0, 2)
        hyper = np.rot90(hyper, k=3, axes=(1, 2))
        hyper = np.flip(hyper, axis=2)
        self.hyper = hyper
                
        
    # Original image  
    def img(self):
        plt.imshow(self.im)


    # Image at certain band
    def hyper_img(self, band):
        return(plt.imshow(self.hyper[:, :, band], cmap = "viridis"))


    # Spectra at certain pixel
    def hyper_band(self, x, y, color):
        
        self.color = str(color)
        
        fig, ax = plt.subplots(figsize=(2, 2))
        ax.plot(self.hyper[:, y, x], color=self.color)
        
        return fig

Generating Artificial Data¶

Reading Blazers Image¶

In [5]:
# Define image dimensions
zpix = 300
ypix = 150
xpix = 150

rgba = Image.open(r"png-figures/uab-blazers.png")
rgba = rgba.resize((xpix, ypix), Image.Resampling.LANCZOS)
#rgba = rgba.transpose(method=Image.FLIP_LEFT_RIGHT)
rgb = rgba.convert('RGB')
gray = rgb.convert('L')

# Display image
fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(rgb)
plt.tight_layout()
No description has been provided for this image

Create HSI¶

In [81]:
# distributions
x = np.linspace(0, zpix, zpix)
red_gaussian = gaussian(x, 1000, 75, 20) 
green_lorentzian = lorentzian(x, 70, 150, 10)
blue_voigt = voigt(x, 225, 0, 500, 7, 1) 

# HSI without noise
hsi1 = HSI(rgb, red_gaussian, green_lorentzian, blue_voigt, zpix, ypix, xpix)
hsi1 = hsi1.hyper

# HSI with Gaussian noise
hsi = HSI(rgb, red_gaussian, green_lorentzian, blue_voigt, zpix, ypix, xpix, 
           filter_type=None, filter_strength=0, noise=1)
hsi = hsi.hyper

#hsi.hyper_band(100, 200, color[0])
#plt.show()

fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(hsi[zpix//2, :, :])
plt.tight_layout()
No description has been provided for this image

Plot HSI¶

Plot Code¶

In [8]:
def artificial_data_plot(suptitle, hsi, pts, save=False):
    
    # Create figure
    fig, axes = plt.subplots(figsize=(10, 12))
    fig.suptitle(suptitle, fontsize=20)
    rows = 3 
    columns = 2
    gs = GridSpec(rows, columns)
    gs.update(wspace=0.1,hspace=0.1)
    
    title_fontsize = 7
    axes.spines["top"].set_visible(False)
    axes.spines["right"].set_visible(False)
    axes.spines["bottom"].set_visible(False)
    axes.spines["left"].set_visible(False)
    axes.set_xticks([])
    axes.set_yticks([])
    
    count = 0
    for i in range(rows):

        # img
        ax = fig.add_subplot(gs[i, 0])
        ax.imshow(hsi[pts[i][0][0], :, ], cmap=cmap)
        ax.set_title("Wavelength: " + str(pts[i][0][0]), fontsize=title_fontsize)
        ax.plot(pts[i][0][2], pts[i][0][1], "X", markersize=5, c=color[count])
        ax.plot(pts[i][1][2], pts[i][1][1], "X", markersize=5, c=color[count + 1])
        ax.set_xticks([])
        ax.set_yticks([])

        # plt
        ax = fig.add_subplot(gs[i, 1])
        ax.plot(hsi[:, pts[i][0][1], pts[i][0][2]], lw=2, c=color[count])
        ax.plot(hsi[:, pts[i][1][1], pts[i][1][2]], lw=2, c=color[count + 1])
        ax.axvline(x=pts[i][0][0], ls="--", c="black", lw=2)
        
        count += 2
        
    if save:
        fig.savefig(save_directory + suptitle + file_extension)

Plots¶

In [30]:
# list of 3 sets of red and blue points ordered [z, y, x]
pts = [[[50, 75, 75], [50, 72, 110]], [[150, 22, 140], [150, 22, 60]], 
       [[250, 70, 20], [250, 70, 30]]]
In [297]:
artificial_data_plot("Artificial HSI with no Noise", hsi1, pts)
No description has been provided for this image
In [298]:
artificial_data_plot("Artificial HSI with Noise", hsi, pts)
No description has been provided for this image

Test¶

In [219]:
test = rgba.resize((150, 150), Image.Resampling.LANCZOS)
test = test.convert('RGB')

fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(test)
plt.tight_layout()
No description has been provided for this image

Extra¶

In [110]:
# Just in case
def artificial_data_plot(suptitle, hsi, 
                         xr1=150, yr1=150, xb1=225, yb1=150, z1=230, 
                         xr2=150, yr2=237, xb2=122, yb2=45, z2=420,
                         xr3=50, yr3=150, xb3=235, yb3=150, z3=800,
                         save=False):
    
    # Create figure
    fig, ax = plt.subplots(figsize=(10, 8))
    fig.suptitle(suptitle, fontsize=20)
    rows = 3 
    columns = 2
    title_fontsize = 7

    # img1
    img1 = fig.add_subplot(rows, columns, 1)
    img1.imshow(hsi.hyper[z1, :, ], cmap=cmap)
    img1.set_title("Wavelength: " + str(z1), fontsize=title_fontsize)
    img1.plot(xr1, yr1, "X", markersize=5, c=color[2]) # red
    img1.plot(xb1, yb1, "X", markersize=5, c=color[0]) # blue

    # plt1
    plt1 = fig.add_subplot(rows, columns, 2)
    plt1.plot(hsi.hyper[:, yr1, xr1], c=color[2]) # red
    plt1.plot(hsi.hyper[:, yb1, xb1], c=color[0]) # blue
    plt1.axvline(x=z1, ls="--", c=color[4], lw=2)
    
    # img2
    img2 = fig.add_subplot(rows, columns, 3)
    img2.imshow(hsi.hyper[z2, :, ], cmap=cmap)
    img2.set_title("Wavelength: " + str(z2), fontsize=title_fontsize)
    img2.plot(xr2, yr2, "X", markersize=5, c=color[2]) # red
    img2.plot(xb2, yb2, "X", markersize=5, c=color[0]) # blue

    # plt2
    plt2 = fig.add_subplot(rows, columns, 4)
    plt2.plot(hsi.hyper[:, yr2, xr2], c=color[2]) # red
    plt2.plot(hsi.hyper[:, yb2, xb2], c=color[0]) # blue
    plt2.axvline(x=z2, ls="--", c=color[4], lw=2)
    
    # img3
    img3 = fig.add_subplot(rows, columns, 5)
    img3.imshow(hsi.hyper[z3, :, ], cmap=cmap)
    img3.set_title("Wavelength: " + str(z3), fontsize=title_fontsize)
    img3.plot(xr3, yr3, "X", markersize=5, c=color[2]) # red
    img3.plot(xb3, yb3, "X", markersize=5, c=color[0]) # blue

    # plt3
    plt3 = fig.add_subplot(rows, columns, 6)
    plt3.plot(hsi.hyper[:, yr3, xr3], c=color[2]) # red
    plt3.plot(hsi.hyper[:, yb3, xb3], c=color[0]) # blue
    plt3.axvline(x=z3, ls="--", c=color[4], lw=2)

    if save:
        fig.savefig(save_directory + suptitle + file_extension)

Calibrator¶

In [84]:
# Read image
rgb_calibrator = Image.open(r"png-figures/rgb-calibrator.png")
rgb_calibrator = rgb_calibrator.resize((xpix, ypix),Image.Resampling.LANCZOS)
gray_calibrator = rgb_calibrator.convert('L')

# Display image
fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(rgb_calibrator)
plt.tight_layout()
No description has been provided for this image
In [85]:
# Create calibrator HSI
hsi_rgb = HSI(rgb_calibrator, red_gaussian, green_lorentzian, blue_voigt, zpix, ypix, xpix, noise=1)
hsi_rgb = hsi_rgb.hyper

fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(hsi_rgb[150, :, :])
plt.tight_layout()

calibrator_pts = [[[50, 75, 25], [50, 75, 75]], [[150, 75, 75], [150, 75, 125]], 
       [[250, 75, 25], [250, 75, 125]]]
No description has been provided for this image
In [323]:
artificial_data_plot("Calibrator HSI", hsi_rgb, calibrator_pts)
No description has been provided for this image

Preprocessing¶

Calibrator¶

In [93]:
hsi_rgb_1d = np.reshape(hsi_rgb, (zpix * ypix * xpix)) # reshape to 1D

# subtract background
# take mean of region with values close to or equal to zero
hsi_rgb_sb_mean = np.mean(hsi_rgb[0:49, 75, 125]) 
hsi_rgb_1d = [(i - hsi_rgb_sb_mean) for i in hsi_rgb_1d]
hsi_rgb_1d = [i.clip(min=0) for i in hsi_rgb_1d]

# add median filter
hsi_rgb_1d = median_filter(hsi_rgb_1d, size=3, footprint=None, output=None, mode="reflect", cval=0.0, origin=0)

hsi_rgb_denoised = np.reshape(hsi_rgb_1d, (zpix, ypix, xpix))
hsi_rgb_denoised_2d = np.reshape(hsi_rgb_1d, (zpix, ypix * xpix))

print(hsi_rgb_denoised[0:99, 75, 125])
[0.         0.         0.         0.27552875 0.         0.04978867
 0.         0.16889373 0.         0.         0.38135244 0.20566114
 0.10594008 0.         0.         0.09642526 0.         0.
 0.10770786 0.19562126 0.         0.36177429 0.         0.
 0.0503009  0.08127209 0.02828932 0.         0.         0.42239525
 0.22861897 0.35820817 0.04569547 0.         0.         0.16142889
 0.24382526 0.         0.28222269 0.0043404  0.07616022 0.15895464
 0.01760716 0.13643076 0.         0.         0.         0.16622664
 0.07762084 0.83714537 0.         0.         0.         0.04354635
 0.43763247 0.         0.07563135 0.         0.09497708 0.
 0.         0.32586182 0.33358066 0.         0.         0.75859235
 0.17609968 0.06178614 0.         0.35788081 0.19915186 0.45181453
 0.15375299 0.         0.         0.00478558 0.         0.1310737
 0.01164888 0.         0.07062049 0.21185933 0.2387573  0.
 0.         0.         0.         0.41862053 0.13288304 0.22579255
 0.33791578 0.         0.         0.         0.         0.
 0.04344398 0.08056229 0.29355606]
In [322]:
artificial_data_plot("Denoised Artificial HSI Calibrator", hsi_rgb_denoised, calibrator_pts)
No description has been provided for this image

Noisy data¶

In [95]:
hsi_1d = np.reshape(hsi, (zpix * ypix * xpix)) # reshape to 1D

# subtract background
hsi_sb_mean = np.mean(hsi[0:49, 70, 30]) 
hsi_1d = [(i - hsi_sb_mean) for i in hsi_1d]
hsi_1d = [i.clip(min=0) for i in hsi_1d]

# add median filter
hsi_1d = median_filter(hsi_1d, size=3, footprint=None, output=None, mode="reflect", cval=0.0, origin=0)

hsi_denoised = np.reshape(hsi_1d, (zpix, ypix, xpix))
hsi_denoised_2d = np.reshape(hsi_1d, (zpix, ypix * xpix))

print(hsi_denoised[0:49, 70, 30])
[0.         0.         0.08978075 0.00598767 0.09004533 0.
 0.         0.         0.         0.16527938 0.         0.26038743
 0.03756016 0.         0.11066113 0.         0.         0.
 0.12138748 0.12681052 0.         0.03256904 0.12539103 0.
 0.         0.         0.         0.06234503 0.28816074 0.29807671
 0.13887105 0.         0.19668559 0.016202   0.29717581 0.
 0.         0.         0.06047049 0.07211239 0.0576191  0.10714568
 0.         0.19122916 0.1980068  0.04121401 0.         0.
 0.0304507 ]
In [321]:
artificial_data_plot("Denoised Artificial HSI", hsi_denoised, pts)
No description has been provided for this image

NMF on Artificial Data¶

Blind NMF with Nimfa¶

Calibrator¶

In [253]:
iterations = 400
update = "divergence"
objective = "fro" # frobenius norm objective function
test_conv = 10 # how often convergence test is done
min_residuals = None # minimum required improvement of residuals from previous iteration

nmf0_2comp = nimfa.Nmf(hsi_rgb_denoised_2d, max_iter=iterations, rank=2, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf0_2comp_fit = nmf0_2comp()
nmf0_2comp_W = nmf0_2comp_fit.basis()
nmf0_2comp_H = nmf0_2comp_fit.coef()

nmf0_3comp = nimfa.Nmf(hsi_rgb_denoised_2d, max_iter=iterations, rank=3, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf0_3comp_fit = nmf0_3comp()
nmf0_3comp_W = nmf0_3comp_fit.basis()
nmf0_3comp_H = nmf0_3comp_fit.coef()

nmf0_4comp = nimfa.Nmf(hsi_rgb_denoised_2d, max_iter=iterations, rank=4, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf0_4comp_fit = nmf0_4comp()
nmf0_4comp_W = nmf0_4comp_fit.basis()
nmf0_4comp_H = nmf0_4comp_fit.coef()

nmf0_5comp = nimfa.Nmf(hsi_rgb_denoised_2d, max_iter=iterations, rank=5, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf0_5comp_fit = nmf0_5comp()
nmf0_5comp_W = nmf0_5comp_fit.basis()
nmf0_5comp_H = nmf0_5comp_fit.coef()

nmf0_6comp = nimfa.Nmf(hsi_rgb_denoised_2d, max_iter=iterations, rank=6, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf0_6comp_fit = nmf0_6comp()
nmf0_6comp_W = nmf0_6comp_fit.basis()
nmf0_6comp_H = nmf0_6comp_fit.coef()

# Arrays of matrices
nmf0_spect_matrices = [nmf0_2comp_W, nmf0_3comp_W, nmf0_4comp_W, nmf0_5comp_W, nmf0_6comp_W]
nmf0_img_matrices = [nmf0_2comp_H, nmf0_3comp_H, nmf0_4comp_H, nmf0_5comp_H, nmf0_6comp_H]
In [303]:
nmf_plot("Blind NMF on Calibrator 2-6 Components Nimfa", nmf0_spect_matrices, 
         nmf0_img_matrices, xpix, ypix)
No description has been provided for this image

Noiseless artificial data¶

In [255]:
hsi1_2d = np.reshape(hsi1, (zpix, ypix * xpix))

iterations = 400
update = "divergence"
objective = "fro" # frobenius norm objective function
test_conv = 10 # how often convergence test is done
min_residuals = None # minimum required improvement of residuals from previous iteration

nmf1_2comp = nimfa.Nmf(hsi1_2d, max_iter=iterations, rank=2, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf1_2comp_fit = nmf1_2comp()
nmf1_2comp_W = nmf1_2comp_fit.basis()
nmf1_2comp_H = nmf1_2comp_fit.coef()

nmf1_3comp = nimfa.Nmf(hsi1_2d, max_iter=iterations, rank=3, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf1_3comp_fit = nmf1_3comp()
nmf1_3comp_W = nmf1_3comp_fit.basis()
nmf1_3comp_H = nmf1_3comp_fit.coef()

nmf1_4comp = nimfa.Nmf(hsi1_2d, max_iter=iterations, rank=4, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf1_4comp_fit = nmf1_4comp()
nmf1_4comp_W = nmf1_4comp_fit.basis()
nmf1_4comp_H = nmf1_4comp_fit.coef()

nmf1_5comp = nimfa.Nmf(hsi1_2d, max_iter=iterations, rank=5, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf1_5comp_fit = nmf1_5comp()
nmf1_5comp_W = nmf1_5comp_fit.basis()
nmf1_5comp_H = nmf1_5comp_fit.coef()

nmf1_6comp = nimfa.Nmf(hsi1_2d, max_iter=iterations, rank=6, update=update, objective=objective, 
                       test_conv=test_conv, min_residuals=min_residuals)
nmf1_6comp_fit = nmf1_6comp()
nmf1_6comp_W = nmf1_6comp_fit.basis()
nmf1_6comp_H = nmf1_6comp_fit.coef()

# Arrays of matrices
nmf1_spect_matrices = [nmf1_2comp_W, nmf1_3comp_W, nmf1_4comp_W, nmf1_5comp_W, nmf1_6comp_W]
nmf1_img_matrices = [nmf1_2comp_H, nmf1_3comp_H, nmf1_4comp_H, nmf1_5comp_H, nmf1_6comp_H]

#nmf1_spect_matrices = [nmf1_2comp_W]
#nmf1_img_matrices = [nmf1_2comp_H]
In [304]:
nmf_plot("Blind NMF on Noiseless Artificial Data 2-6 Components Nimfa", nmf1_spect_matrices, 
         nmf1_img_matrices, xpix, ypix)
No description has been provided for this image

Noisy artificial data¶

In [257]:
iterations = 400
update = "divergence"
objective = "fro" # frobenius norm objective function
test_conv = 10
min_residuals = None

nmf2_2comp = nimfa.Nmf(hsi_denoised_2d, max_iter=iterations, rank=2, update=update, 
                      objective=objective, test_conv=test_conv, min_residuals=min_residuals)
nmf2_2comp_fit = nmf2_2comp()
nmf2_2comp_W = nmf2_2comp_fit.basis()
nmf2_2comp_H = nmf2_2comp_fit.coef()

nmf2_3comp = nimfa.Nmf(hsi_denoised_2d, max_iter=iterations, rank=3, update=update, 
                      objective=objective, test_conv=test_conv, min_residuals=min_residuals)
nmf2_3comp_fit = nmf2_3comp()
nmf2_3comp_W = nmf2_3comp_fit.basis()
nmf2_3comp_H = nmf2_3comp_fit.coef()

nmf2_4comp = nimfa.Nmf(hsi_denoised_2d, max_iter=iterations, rank=4, update=update, 
                      objective=objective, test_conv=test_conv, min_residuals=min_residuals)
nmf2_4comp_fit = nmf2_4comp()
nmf2_4comp_W = nmf2_4comp_fit.basis()
nmf2_4comp_H = nmf2_4comp_fit.coef()

nmf2_5comp = nimfa.Nmf(hsi_denoised_2d, max_iter=iterations, rank=5, update=update, 
                      objective=objective, test_conv=test_conv, min_residuals=min_residuals)
nmf2_5comp_fit = nmf2_5comp()
nmf2_5comp_W = nmf2_5comp_fit.basis()
nmf2_5comp_H = nmf2_5comp_fit.coef()

nmf2_6comp = nimfa.Nmf(hsi_denoised_2d, max_iter=iterations, rank=6, update=update, 
                      objective=objective, test_conv=test_conv, min_residuals=min_residuals)
nmf2_6comp_fit = nmf2_6comp()
nmf2_6comp_W = nmf2_6comp_fit.basis()
nmf2_6comp_H = nmf2_6comp_fit.coef()

# Arrays of matrices
nmf2_spect_matrices = [nmf2_2comp_W, nmf2_3comp_W, nmf2_4comp_W, nmf2_5comp_W, nmf2_6comp_W]
nmf2_img_matrices = [nmf2_2comp_H, nmf2_3comp_H, nmf2_4comp_H, nmf2_5comp_H, nmf2_6comp_H]
In [305]:
nmf_plot("Blind NMF on Noisy Artificial Data 2-6 Components Nimfa", 
         nmf2_spect_matrices, nmf2_img_matrices, xpix, ypix)
No description has been provided for this image

Blind NMF with Scikit-learn¶

In [259]:
init = "random"
solver = "cd"
beta_loss = "frobenius"
tolerance = 0
iterations = 400
random_state = None
alpha_W = 0.0
alpha_H = "same"
l1_ratio = 0.0
verbose = 0
shuffle = False


# 2 Components
nmf_2comp = NMF(n_components=2, init=init, solver=solver, beta_loss=beta_loss, 
                tol=tolerance, max_iter=iterations, random_state=random_state, alpha_W=alpha_W, 
                   alpha_H=alpha_H, l1_ratio=l1_ratio, verbose=verbose, shuffle=shuffle)
# 3 Components
nmf_3comp = NMF(n_components=3, init=init, solver=solver, beta_loss=beta_loss, 
                tol=tolerance, max_iter=iterations, random_state=random_state, alpha_W=alpha_W, 
                   alpha_H=alpha_H, l1_ratio=l1_ratio, verbose=verbose, shuffle=shuffle)
# 4 Components
nmf_4comp = NMF(n_components=4, init=init, solver=solver, beta_loss=beta_loss, 
                tol=tolerance, max_iter=iterations, random_state=random_state, alpha_W=alpha_W, 
                   alpha_H=alpha_H, l1_ratio=l1_ratio, verbose=verbose, shuffle=shuffle)
# 5 Components
nmf_5comp = NMF(n_components=5, init=init, solver=solver, beta_loss=beta_loss, 
                tol=tolerance, max_iter=iterations, random_state=random_state, alpha_W=alpha_W, 
                   alpha_H=alpha_H, l1_ratio=l1_ratio, verbose=verbose, shuffle=shuffle)
# 6 Components
nmf_6comp = NMF(n_components=6, init=init, solver=solver, beta_loss=beta_loss, 
                tol=tolerance, max_iter=iterations, random_state=random_state, alpha_W=alpha_W, 
                   alpha_H=alpha_H, l1_ratio=l1_ratio, verbose=verbose, shuffle=shuffle)

Calibrator¶

In [260]:
# 2 Components
rgb_nmf_2comp = nmf_2comp
W_rgb_nmf_2comp = rgb_nmf_2comp.fit_transform(hsi_rgb_denoised_2d)
H_rgb_nmf_2comp = rgb_nmf_2comp.components_
# 3 Components
rgb_nmf_3comp = nmf_3comp
W_rgb_nmf_3comp = rgb_nmf_3comp.fit_transform(hsi_rgb_denoised_2d)
H_rgb_nmf_3comp = rgb_nmf_3comp.components_
# 4 Components
rgb_nmf_4comp = nmf_4comp
W_rgb_nmf_4comp = rgb_nmf_4comp.fit_transform(hsi_rgb_denoised_2d)
H_rgb_nmf_4comp = rgb_nmf_4comp.components_
# 5 Components
rgb_nmf_5comp = nmf_5comp
W_rgb_nmf_5comp = rgb_nmf_5comp.fit_transform(hsi_rgb_denoised_2d)
H_rgb_nmf_5comp = rgb_nmf_5comp.components_
# 6 Components
rgb_nmf_6comp = nmf_6comp
W_rgb_nmf_6comp = rgb_nmf_6comp.fit_transform(hsi_rgb_denoised_2d)
H_rgb_nmf_6comp = rgb_nmf_6comp.components_

# Spectral components
rgb_nmf_spect_matrices = [W_rgb_nmf_2comp, W_rgb_nmf_3comp, W_rgb_nmf_4comp, W_rgb_nmf_5comp, 
                         W_rgb_nmf_6comp]
# Image components
rgb_nmf_img_matrices = [H_rgb_nmf_2comp, H_rgb_nmf_3comp, H_rgb_nmf_4comp, H_rgb_nmf_5comp, 
                       H_rgb_nmf_6comp]
In [306]:
nmf_plot("Blind NMF on Calibrator 2-6 Components", 
         rgb_nmf_spect_matrices, rgb_nmf_img_matrices, xpix, ypix)
No description has been provided for this image

Noiseless artificial data¶

In [262]:
# 2 Components
hsi1_nmf_2comp = nmf_2comp
W_hsi1_nmf_2comp = hsi1_nmf_2comp.fit_transform(hsi1_2d)
H_hsi1_nmf_2comp = hsi1_nmf_2comp.components_
# 3 Components
hsi1_nmf_3comp = nmf_3comp
W_hsi1_nmf_3comp = hsi1_nmf_3comp.fit_transform(hsi1_2d)
H_hsi1_nmf_3comp = hsi1_nmf_3comp.components_
# 4 Components
hsi1_nmf_4comp = nmf_4comp
W_hsi1_nmf_4comp = hsi1_nmf_4comp.fit_transform(hsi1_2d)
H_hsi1_nmf_4comp = hsi1_nmf_4comp.components_
# 5 Components
hsi1_nmf_5comp = nmf_5comp
W_hsi1_nmf_5comp = hsi1_nmf_5comp.fit_transform(hsi1_2d)
H_hsi1_nmf_5comp = hsi1_nmf_5comp.components_
# 6 Components
hsi1_nmf_6comp = nmf_6comp
W_hsi1_nmf_6comp = hsi1_nmf_6comp.fit_transform(hsi1_2d)
H_hsi1_nmf_6comp = hsi1_nmf_6comp.components_

# Spectral components
hsi1_nmf_spect_matrices = [W_hsi1_nmf_2comp, W_hsi1_nmf_3comp, W_hsi1_nmf_4comp, W_hsi1_nmf_5comp, 
                         W_hsi1_nmf_6comp]
# Image components
hsi1_nmf_img_matrices = [H_hsi1_nmf_2comp, H_hsi1_nmf_3comp, H_hsi1_nmf_4comp, H_hsi1_nmf_5comp, 
                       H_hsi1_nmf_6comp]
In [307]:
nmf_plot("Blind NMF on Noiseless Artificial Data 2-6 Components", 
         hsi1_nmf_spect_matrices, hsi1_nmf_img_matrices, xpix, ypix)
No description has been provided for this image

Noisy artificial data¶

In [264]:
# 2 Components
hsi_nmf_2comp = nmf_2comp
W_hsi_nmf_2comp = hsi_nmf_2comp.fit_transform(hsi_denoised_2d)
H_hsi_nmf_2comp = hsi_nmf_2comp.components_
# 3 Components
hsi_nmf_3comp = nmf_3comp
W_hsi_nmf_3comp = hsi_nmf_3comp.fit_transform(hsi_denoised_2d)
H_hsi_nmf_3comp = hsi_nmf_3comp.components_
# 4 Components
hsi_nmf_4comp = nmf_4comp
W_hsi_nmf_4comp = hsi_nmf_4comp.fit_transform(hsi_denoised_2d)
H_hsi_nmf_4comp = hsi_nmf_4comp.components_
# 5 Components
hsi_nmf_5comp = nmf_5comp
W_hsi_nmf_5comp = hsi_nmf_5comp.fit_transform(hsi_denoised_2d)
H_hsi_nmf_5comp = hsi_nmf_5comp.components_
# 6 Components
hsi_nmf_6comp = nmf_6comp
W_hsi_nmf_6comp = hsi_nmf_6comp.fit_transform(hsi_denoised_2d)
H_hsi_nmf_6comp = hsi_nmf_6comp.components_

# Spectral components
hsi_nmf_spect_matrices = [W_hsi_nmf_2comp, W_hsi_nmf_3comp, W_hsi_nmf_4comp, W_hsi_nmf_5comp, 
                         W_hsi_nmf_6comp]
# Image components
hsi_nmf_img_matrices = [H_hsi_nmf_2comp, H_hsi_nmf_3comp, H_hsi_nmf_4comp, H_hsi_nmf_5comp, 
                       H_hsi_nmf_6comp]
In [308]:
nmf_plot("Blind NMF on Noisy Artificial Data 2-6 Components", 
         hsi_nmf_spect_matrices, hsi_nmf_img_matrices, xpix, ypix)
No description has been provided for this image

Explained Variance¶

In [266]:
rgb_nmf_models = [rgb_nmf_2comp, rgb_nmf_3comp, rgb_nmf_4comp, rgb_nmf_5comp, rgb_nmf_6comp]
rgb_nmf_results = [error_analysis(rgb_nmf_models, hsi_rgb_denoised_2d)]

hsi1_nmf_models = [hsi1_nmf_2comp, hsi1_nmf_3comp, hsi1_nmf_4comp, hsi1_nmf_5comp, hsi1_nmf_6comp]
hsi1_nmf_results = [error_analysis(hsi1_nmf_models, hsi1_2d)]

hsi_nmf_models = [hsi_nmf_2comp, hsi_nmf_3comp, hsi_nmf_4comp, hsi_nmf_5comp, hsi_nmf_6comp]
hsi_nmf_results = [error_analysis(hsi_nmf_models, hsi_denoised_2d)]
In [309]:
explained_variance_plot("Explained Variance of Blind NMF on Artificial Data", rgb_nmf_results, 
                        hsi1_nmf_results, hsi_nmf_results)
No description has been provided for this image

Sparse NMF with Nimfa¶

In [268]:
n_comp = 3
iterations = 400
sparseness_levels = [1e-8, 1e-6, 1e-4, 1e-3, 0.01, 0.1, 1.0, 5.0]

rgb_snmf_matrices = []
hsi1_snmf_matrices = []
hsi_snmf_matrices = []

rgb_snmf_models = []
hsi1_snmf_models = []
hsi_snmf_models = []

for s in sparseness_levels: # all models enforce sparseness in the spectral components
    
    rgb_matrices, rgb_model = (snmf_model(hsi_rgb_denoised_2d, s, n_comp, iterations, "l"))
    rgb_snmf_matrices.append(rgb_matrices)
    rgb_snmf_models.append(rgb_model)
    
    print("Calibrator SNMF with sparseness level " + str(s) + " completed")
    
    hsi1_matrices, hsi1_model = (snmf_model(hsi1_2d, s, n_comp, iterations, "l"))
    hsi1_snmf_matrices.append(hsi1_matrices)
    hsi1_snmf_models.append(hsi1_model)
    
    print("Noiseless SNMF with sparseness level " + str(s) + " completed")
    
    hsi_matrices, hsi_model = (snmf_model(hsi_denoised_2d, s, n_comp, iterations, "l"))
    hsi_snmf_matrices.append(hsi_matrices)
    hsi_snmf_models.append(hsi_model)
    
    print("Noisy SNMF with sparseness level " + str(s) + " completed")
Calibrator SNMF with sparseness level 1e-08 completed
Noiseless SNMF with sparseness level 1e-08 completed
Noisy SNMF with sparseness level 1e-08 completed
Calibrator SNMF with sparseness level 1e-06 completed
Noiseless SNMF with sparseness level 1e-06 completed
Noisy SNMF with sparseness level 1e-06 completed
Calibrator SNMF with sparseness level 0.0001 completed
Noiseless SNMF with sparseness level 0.0001 completed
Noisy SNMF with sparseness level 0.0001 completed
Calibrator SNMF with sparseness level 0.001 completed
Noiseless SNMF with sparseness level 0.001 completed
Noisy SNMF with sparseness level 0.001 completed
Calibrator SNMF with sparseness level 0.01 completed
Noiseless SNMF with sparseness level 0.01 completed
Noisy SNMF with sparseness level 0.01 completed
Calibrator SNMF with sparseness level 0.1 completed
Noiseless SNMF with sparseness level 0.1 completed
Noisy SNMF with sparseness level 0.1 completed
Calibrator SNMF with sparseness level 1.0 completed
Noiseless SNMF with sparseness level 1.0 completed
Noisy SNMF with sparseness level 1.0 completed
Calibrator SNMF with sparseness level 5.0 completed
Noiseless SNMF with sparseness level 5.0 completed
Noisy SNMF with sparseness level 5.0 completed

Calibrator¶

In [310]:
snmf_plot("Sparse NMF with Calibrator", 
          rgb_snmf_matrices, sparseness_levels, n_comp, xpix, ypix)
No description has been provided for this image

Noiseless artificial data¶

In [311]:
snmf_plot("Sparse NMF with Noiseless Artificial Data", 
          hsi1_snmf_matrices, sparseness_levels, n_comp, xpix, ypix)
No description has been provided for this image

Noisy artificial data¶

In [312]:
snmf_plot("Sparse NMF with Noisy Artificial Data", 
          hsi_snmf_matrices, sparseness_levels, n_comp, xpix, ypix)
No description has been provided for this image

Sparse NMF Explained Variance¶

In [272]:
rgb_snmf_results = [error_analysis_nimfa(rgb_snmf_models)]
hsi1_snmf_results = [error_analysis_nimfa(hsi1_snmf_models)]
hsi_snmf_results = [error_analysis_nimfa(hsi_snmf_models)]
In [313]:
sparseness_evr_plot("Explained Variance of Sparse NMF on Artificial Data", rgb_snmf_results, 
                        hsi1_snmf_results, hsi_snmf_results, sparseness_levels)
No description has been provided for this image

Unsupervised VAE on Artificial Data¶

Calibrator¶

In [152]:
# Transpose and reshape
rgb_denoised_1d_transpose = hsi_rgb_denoised_2d.transpose(1,0).reshape(zpix * ypix * xpix)
rgb_denoised_2d_norm = normalize(rgb_denoised_1d_transpose).reshape(xpix * ypix, zpix)

# Convert to Torch tensor object
rgb_denoised_2d_norm_t = torch.from_numpy(np.array(rgb_denoised_2d_norm).astype('float64')).float()
rgb_n_samples = rgb_denoised_2d_norm_t.size()[0] # number of spectral points
rgb_l_signal = rgb_denoised_2d_norm_t.size()[1] # number of spectra

rgb_train_data = rgb_denoised_2d_norm_t.clone()
rgb_train_loader = pv.utils.init_dataloader(rgb_train_data.unsqueeze(1), batch_size=64)

rgb_in_dim = (rgb_l_signal,)
# Gradually increase the weight of the KL Divergence term in the loss function with each epoch
rgb_kl_scale = torch.linspace(start=.001, end=.01, steps=50)

# Train the model
rgb_vae = pv.models.iVAE(rgb_in_dim, latent_dim=2, invariances=None, sampler_d="gaussian")
rgb_vae_trainer = pv.trainers.SVItrainer(rgb_vae)

for e in range(50):
    sc = rgb_kl_scale[e] if e < len(rgb_kl_scale) else rgb_kl_scale[-1]
    rgb_vae_trainer.step(rgb_train_loader, scale_factor=sc)
    if e//10 == e/10:
        rgb_vae_trainer.print_statistics()
        
rgb_vae_z_mean, rgb_vae_z_sd = rgb_vae.encode(rgb_train_data)
rgb_vae_z_mean = normalize(rgb_vae_z_mean)
rgb_vae_z_sd = normalize(rgb_vae_z_sd)
Epoch: 1 Training loss: 74.4753
Epoch: 11 Training loss: 67.7587
Epoch: 21 Training loss: 67.7604
Epoch: 31 Training loss: 67.7679
Epoch: 41 Training loss: 67.7786
In [314]:
vae_plot("Vanilla VAE on Calibrator", rgb_vae_z_mean, xpix, ypix)
No description has been provided for this image

Noiseless artificial data¶

In [154]:
# Transpose and reshape
hsi1_denoised_1d_transpose = hsi1_2d.transpose(1,0).reshape(zpix * ypix * xpix)
hsi1_denoised_2d_norm = normalize(hsi1_denoised_1d_transpose).reshape(xpix * ypix, zpix)

# Convert to Torch tensor object
hsi1_denoised_2d_norm_t = torch.from_numpy(np.array(hsi1_denoised_2d_norm).astype('float64')).float()
hsi1_n_samples = hsi1_denoised_2d_norm_t.size()[0] # number of spectral points
hsi1_l_signal = hsi1_denoised_2d_norm_t.size()[1] # number of spectra

hsi1_train_data = hsi1_denoised_2d_norm_t.clone()
hsi1_train_loader = pv.utils.init_dataloader(hsi1_train_data.unsqueeze(1), batch_size=64)

hsi1_in_dim = (hsi1_l_signal,)
# Gradually increase the weight of the KL Divergence term in the loss function with each epoch
hsi1_kl_scale = torch.linspace(start=.001, end=.01, steps=50)

# Train the model
hsi1_vae = pv.models.iVAE(hsi1_in_dim, latent_dim=2, invariances=None, sampler_d="gaussian")
hsi1_vae_trainer = pv.trainers.SVItrainer(hsi1_vae)

for e in range(50):
    sc = hsi1_kl_scale[e] if e < len(hsi1_kl_scale) else hsi1_kl_scale[-1]
    hsi1_vae_trainer.step(hsi1_train_loader, scale_factor=sc)
    if e//10 == e/10:
        hsi1_vae_trainer.print_statistics()
        
hsi1_vae_z_mean, hsi1_vae_z_sd = hsi1_vae.encode(hsi1_train_data)
hsi1_vae_z_mean = normalize(hsi1_vae_z_mean)
hsi1_vae_z_sd = normalize(hsi1_vae_z_sd)
Epoch: 1 Training loss: 73.6469
Epoch: 11 Training loss: 67.7582
Epoch: 21 Training loss: 67.7607
Epoch: 31 Training loss: 67.7626
Epoch: 41 Training loss: 67.7671
In [315]:
vae_plot("Vanilla VAE on Noiseless Artificial Data", hsi1_vae_z_mean, xpix, ypix)
No description has been provided for this image
In [156]:
hsi1_vae.manifold2d(d=5)
No description has been provided for this image
Out[156]:
tensor([[2.0481e-03, 2.0453e-03, 2.1140e-03,  ..., 2.2354e-03, 2.1738e-03,
         2.1642e-03],
        [1.4051e-03, 1.4795e-03, 1.3848e-03,  ..., 1.4651e-03, 1.5669e-03,
         1.4010e-03],
        [1.2660e-04, 1.2518e-04, 1.3661e-04,  ..., 1.1144e-04, 1.1084e-04,
         1.0773e-04],
        ...,
        [1.0392e-04, 8.0720e-05, 9.9335e-05,  ..., 9.0773e-05, 9.5632e-05,
         8.1836e-05],
        [5.1150e-05, 4.4734e-05, 5.3905e-05,  ..., 5.1767e-05, 5.4062e-05,
         5.0325e-05],
        [4.9471e-05, 4.7185e-05, 5.2956e-05,  ..., 5.4603e-05, 5.4376e-05,
         5.2619e-05]])

Noisy artificial data¶

In [157]:
# Transpose and reshape
hsi_denoised_1d_transpose = hsi_denoised_2d.transpose(1,0).reshape(zpix * ypix * xpix)
hsi_denoised_2d_norm = normalize(hsi_denoised_1d_transpose).reshape(xpix * ypix, zpix)

# Convert to Torch tensor object
hsi_denoised_2d_norm_t = torch.from_numpy(np.array(hsi_denoised_2d_norm).astype('float64')).float()
hsi_n_samples = hsi_denoised_2d_norm_t.size()[0] # number of spectral points
hsi_l_signal = hsi_denoised_2d_norm_t.size()[1] # number of spectra

hsi_train_data = hsi_denoised_2d_norm_t.clone()
hsi_train_loader = pv.utils.init_dataloader(hsi_train_data.unsqueeze(1), batch_size=64)

hsi_in_dim = (hsi_l_signal,)
# Gradually increase the weight of the KL Divergence term in the loss function with each epoch
hsi_kl_scale = torch.linspace(start=.001, end=.01, steps=50)

# Train the model
hsi_vae = pv.models.iVAE(hsi_in_dim, latent_dim=2, invariances=None, sampler_d="gaussian")
hsi_vae_trainer = pv.trainers.SVItrainer(hsi_vae)

for e in range(50):
    sc = hsi_kl_scale[e] if e < len(hsi_kl_scale) else hsi_kl_scale[-1]
    hsi_vae_trainer.step(hsi_train_loader, scale_factor=sc)
    if e//10 == e/10:
        hsi_vae_trainer.print_statistics()
        
hsi_vae_z_mean, hsi_vae_z_sd = hsi_vae.encode(hsi_train_data)
hsi_vae_z_mean = normalize(hsi_vae_z_mean)
hsi_vae_z_sd = normalize(hsi_vae_z_sd)
Epoch: 1 Training loss: 73.6643
Epoch: 11 Training loss: 67.7575
Epoch: 21 Training loss: 67.7588
Epoch: 31 Training loss: 67.7622
Epoch: 41 Training loss: 67.7672
In [316]:
vae_plot("Vanilla VAE on Noisy Artificial Data", hsi_vae_z_mean, xpix, ypix)
No description has been provided for this image

Determining latent dimensions¶

In [162]:
# X-axis
# Convert to np array and reshape to 2d
hsi_vae_z_mean_x = hsi_vae_z_mean[:, 0].detach().cpu().numpy()
hsi_vae_z_mean_x = hsi_vae_z_mean_x.reshape(ypix, xpix)
print("X Min: ", (hsi_vae_z_mean_x==np.min(hsi_vae_z_mean_x)).nonzero())
print("X Max: ", (hsi_vae_z_mean_x==np.max(hsi_vae_z_mean_x)).nonzero())

hsi_vae_z_mean_x_sort = np.dstack(
    np.unravel_index(np.argsort(hsi_vae_z_mean_x.ravel()), (ypix, xpix)))
hsi_vae_z_mean_x_sort = np.squeeze(hsi_vae_z_mean_x_sort)

spectra_plot("Vanilla VAE on Noisy Artificial Data X-axis Spectra", hsi, hsi_vae_z_mean_x_sort, 
             xpix, ypix, True)
X Min:  (array([86]), array([112]))
X Max:  (array([85, 85]), array([81, 82]))
No description has been provided for this image
In [161]:
# Y-axis
hsi_vae_z_mean_y = hsi_vae_z_mean[:, 1].detach().cpu().numpy()
hsi_vae_z_mean_y = hsi_vae_z_mean_y.reshape(ypix, xpix)
print("Y Min: ", (hsi_vae_z_mean_y==np.min(hsi_vae_z_mean_x)).nonzero())
print("Y Max: ", (hsi_vae_z_mean_y==np.max(hsi_vae_z_mean_x)).nonzero())

hsi_vae_z_mean_y_sort = np.dstack(
    np.unravel_index(np.argsort(hsi_vae_z_mean_y.ravel()), (ypix, xpix)))
hsi_vae_z_mean_y_sort = np.squeeze(hsi_vae_z_mean_y_sort)

spectra_plot("Vanilla VAE on Noisy Artificial Data Y-axis Spectra", hsi, hsi_vae_z_mean_y_sort, 
             xpix, ypix, True)
Y Min:  (array([], dtype=int64), array([], dtype=int64))
Y Max:  (array([], dtype=int64), array([], dtype=int64))
No description has been provided for this image
In [160]:
hsi_vae.manifold2d(d=5)
No description has been provided for this image
Out[160]:
tensor([[7.1727e-05, 4.1761e-05, 1.2139e-03,  ..., 2.9731e-03, 1.2540e-04,
         3.7594e-05],
        [6.7384e-05, 4.2683e-05, 8.1973e-04,  ..., 1.9013e-03, 9.5944e-05,
         3.8472e-05],
        [6.2597e-05, 6.1596e-05, 1.5167e-04,  ..., 1.3518e-04, 4.9536e-05,
         5.5334e-05],
        ...,
        [4.1632e-05, 4.9906e-05, 9.3308e-05,  ..., 8.5248e-05, 3.2839e-05,
         5.0167e-05],
        [3.4235e-05, 3.5931e-05, 4.9513e-05,  ..., 5.3711e-05, 2.1183e-05,
         3.4411e-05],
        [3.6095e-05, 3.1962e-05, 4.2673e-05,  ..., 5.2135e-05, 1.7393e-05,
         3.5447e-05]])

Semi-Supervised VAE on Artificial Data¶

Preparing data¶

In [228]:
pointfinder_plot(hsi, 55, ypix)
No description has been provided for this image
In [325]:
# arrays of points labeled 0 to 3, totalling 4 classes
# format: [y, x]
pts0 = [[0, 0], [0, 50], [0, 100], [94, 70], [115, 30], [127, 30]]
pts1 = [[6, 50], [8, 50], [51, 70], [69, 70], [50, 30], [58, 30]]
pts2 = [[17, 50], [19, 50], [104, 70], [113, 70], [62, 30], [70, 30]]
pts3 = [[56, 60], [47, 70], [49, 60], [53, 60], [47, 55], [53, 55]]

labels = [pts0, pts1, pts2, pts3]

hsi_denoised_3d_norm_t = hsi_denoised_2d_norm_t.reshape(ypix, xpix, zpix)
In [326]:
spectsig_plot("Four Classes of Spectral Features", hsi_denoised_3d_norm_t, labels)
No description has been provided for this image
In [233]:
# Create normalized, denoised, hyperspectral image
hsi_denoised_3d_norm = normalize(hsi_denoised_1d_transpose).reshape(ypix, xpix, zpix)

# Labeled data
n_samples = 6
n_sup = 4 # number of supervised samples
n_val = 2 # number of validation samples
hsi_X_sup = torch.stack((hsi_denoised_3d_norm_t[pts0[0][0],pts0[0][1],:], 
                    hsi_denoised_3d_norm_t[pts0[1][0],pts0[1][1],:],
                    hsi_denoised_3d_norm_t[pts0[2][0],pts0[2][1],:],
                    hsi_denoised_3d_norm_t[pts0[3][0],pts0[3][1],:],
                    hsi_denoised_3d_norm_t[pts1[0][0],pts1[0][1],:], 
                    hsi_denoised_3d_norm_t[pts1[1][0],pts1[1][1],:],
                    hsi_denoised_3d_norm_t[pts1[2][0],pts1[2][1],:],
                    hsi_denoised_3d_norm_t[pts1[3][0],pts1[3][1],:],
                    hsi_denoised_3d_norm_t[pts2[0][0],pts2[0][1],:], 
                    hsi_denoised_3d_norm_t[pts2[1][0],pts2[1][1],:],
                    hsi_denoised_3d_norm_t[pts2[2][0],pts2[2][1],:],
                    hsi_denoised_3d_norm_t[pts2[3][0],pts2[3][1],:],
                    hsi_denoised_3d_norm_t[pts3[0][0],pts3[0][1],:], 
                    hsi_denoised_3d_norm_t[pts3[1][0],pts3[1][1],:],
                    hsi_denoised_3d_norm_t[pts3[2][0],pts3[2][1],:],
                    hsi_denoised_3d_norm_t[pts3[3][0],pts3[3][1],:])).float()

hsi_sup_labels = torch.cat([torch.zeros(n_sup), torch.ones(n_sup),
                           2*torch.ones(n_sup), 3*torch.ones(n_sup)])
hsi_y_sup = pv.utils.to_onehot(hsi_sup_labels.long(), n=4) # create onehot array for supervised labels

hsi_X_val = torch.stack((hsi_denoised_3d_norm_t[pts0[4][0],pts0[4][1],:], 
                         hsi_denoised_3d_norm_t[pts0[5][0],pts0[5][1],:], 
                         hsi_denoised_3d_norm_t[pts1[4][0],pts1[4][1],:], 
                         hsi_denoised_3d_norm_t[pts1[5][0],pts1[5][1],:], 
                         hsi_denoised_3d_norm_t[pts2[4][0],pts2[4][1],:], 
                         hsi_denoised_3d_norm_t[pts2[5][0],pts2[5][1],:], 
                         hsi_denoised_3d_norm_t[pts3[4][0],pts3[4][1],:], 
                         hsi_denoised_3d_norm_t[pts3[5][0],pts3[5][1],:])).float()

#hsi_X_val = torch.cat(hsi_X_val)
hsi_val_labels = torch.cat([torch.zeros(n_val), torch.ones(n_val),
                           2*torch.ones(n_val), 3*torch.ones(n_val)])
hsi_y_val = pv.utils.to_onehot(hsi_val_labels.long(), n=4) # create onehot array for validation labels

# Remove all labeled samples from unlabeled data by setting them to zero
hsi_X_unsup = hsi_denoised_3d_norm_t
for i in range(n_samples):
    for j in range(4):
        hsi_X_unsup[labels[j][i][0], labels[j][i][1], :] *= 0
        
hsi_X_unsup = hsi_X_unsup.reshape(xpix*ypix, zpix)

# Randomly shuffle points in image
#hsi_X_unsup = hsi_X_unsup[torch.randperm(hsi_X_unsup.size()[0])]

(hsi_loader_unsup, hsi_loader_sup, hsi_loader_val) = pv.utils.init_ssvae_dataloaders(
    hsi_X_unsup, (hsi_X_sup, hsi_y_sup), (hsi_X_val, hsi_y_val), batch_size=64)
r = (len(hsi_X_sup) + len(hsi_X_val)) / (len(hsi_X_unsup))
print("Ratio of labeled data to unlabeled data: {}".format(r))
Ratio of labeled data to unlabeled data: 0.0010666666666666667

Training SSVAE Model¶

In [234]:
# Train the SSVAE model

hsi_ss_data_dim = (hsi_X_unsup.shape[1],)
latent_dim = 2
num_classes = 4 

# Initialize model
#hsi_ssvae = pv.models.ss_reg_iVAE(
#    hsi_ss_data_dim, latent_dim, num_classes, invariances=None)
hsi_ssvae = pv.models.ssiVAE(
    hsi_ss_data_dim, latent_dim, num_classes, invariances=None)

# Initialize trainer
hsi_ss_trainer = pv.trainers.auxSVItrainer(hsi_ssvae, task="classification")

# Gradually increase KL weight from 1 to 2 in the first 30 epochs
hsi_ss_kl_scale = torch.linspace(1, 2, 30)
# Train model for n epochs
for e in range(50):
    sc = hsi_ss_kl_scale[e] if e < len(hsi_ss_kl_scale) else hsi_ss_kl_scale[-1]
    hsi_ss_trainer.step(hsi_loader_unsup, hsi_loader_sup, hsi_loader_val,
                 aux_loss_multiplier=50, scale_factor=sc, sampler_d="gaussian")
    hsi_ss_trainer.print_statistics()
    # Plot learned latent manifolds every 10 epoch
    #if (e+1) % 10 == 0:
        #plot_manifolds(f1_ssvae)
Epoch: 1 Training loss: 31.4974, Test accuracy: 0.2500
Epoch: 2 Training loss: 18.7537, Test accuracy: 0.2500
Epoch: 3 Training loss: 18.4341, Test accuracy: 0.2500
Epoch: 4 Training loss: 18.3721, Test accuracy: 0.2500
Epoch: 5 Training loss: 18.3608, Test accuracy: 0.2500
Epoch: 6 Training loss: 18.3342, Test accuracy: 0.2500
Epoch: 7 Training loss: 18.0592, Test accuracy: 0.2500
Epoch: 8 Training loss: 17.9969, Test accuracy: 0.2500
Epoch: 9 Training loss: 17.9891, Test accuracy: 0.2500
Epoch: 10 Training loss: 17.9900, Test accuracy: 0.2500
Epoch: 11 Training loss: 17.9861, Test accuracy: 0.2500
Epoch: 12 Training loss: 17.9681, Test accuracy: 0.2500
Epoch: 13 Training loss: 17.8745, Test accuracy: 0.2500
Epoch: 14 Training loss: 17.7099, Test accuracy: 0.2500
Epoch: 15 Training loss: 17.6864, Test accuracy: 0.2500
Epoch: 16 Training loss: 17.6844, Test accuracy: 0.2500
Epoch: 17 Training loss: 17.6963, Test accuracy: 0.2500
Epoch: 18 Training loss: 17.7018, Test accuracy: 0.2500
Epoch: 19 Training loss: 17.7058, Test accuracy: 0.2500
Epoch: 20 Training loss: 17.7134, Test accuracy: 0.2500
Epoch: 21 Training loss: 17.7141, Test accuracy: 0.2500
Epoch: 22 Training loss: 17.7331, Test accuracy: 0.2500
Epoch: 23 Training loss: 17.7405, Test accuracy: 0.2500
Epoch: 24 Training loss: 17.7440, Test accuracy: 0.2500
Epoch: 25 Training loss: 17.7537, Test accuracy: 0.2500
Epoch: 26 Training loss: 17.7608, Test accuracy: 0.2500
Epoch: 27 Training loss: 17.7585, Test accuracy: 0.2500
Epoch: 28 Training loss: 17.7826, Test accuracy: 0.2500
Epoch: 29 Training loss: 17.7857, Test accuracy: 0.2500
Epoch: 30 Training loss: 17.7846, Test accuracy: 0.2500
Epoch: 31 Training loss: 17.7967, Test accuracy: 0.2500
Epoch: 32 Training loss: 17.7903, Test accuracy: 0.2500
Epoch: 33 Training loss: 17.7868, Test accuracy: 0.2500
Epoch: 34 Training loss: 17.7914, Test accuracy: 0.2500
Epoch: 35 Training loss: 17.7877, Test accuracy: 0.2500
Epoch: 36 Training loss: 17.7859, Test accuracy: 0.2500
Epoch: 37 Training loss: 17.7848, Test accuracy: 0.2500
Epoch: 38 Training loss: 17.7857, Test accuracy: 0.2500
Epoch: 39 Training loss: 17.7896, Test accuracy: 0.2500
Epoch: 40 Training loss: 17.7852, Test accuracy: 0.2500
Epoch: 41 Training loss: 17.7861, Test accuracy: 0.2500
Epoch: 42 Training loss: 17.7836, Test accuracy: 0.2500
Epoch: 43 Training loss: 17.7865, Test accuracy: 0.2500
Epoch: 44 Training loss: 17.7867, Test accuracy: 0.2500
Epoch: 45 Training loss: 17.7840, Test accuracy: 0.2500
Epoch: 46 Training loss: 17.7890, Test accuracy: 0.2500
Epoch: 47 Training loss: 17.7753, Test accuracy: 0.2500
Epoch: 48 Training loss: 17.7842, Test accuracy: 0.2500
Epoch: 49 Training loss: 17.7806, Test accuracy: 0.2500
Epoch: 50 Training loss: 17.7772, Test accuracy: 0.2500
In [235]:
hsi_ssvae_z_mean, hsi_ssvae_z_sd, hsi_ssvae_z_labels = hsi_ssvae.encode(hsi_train_data)
hsi_ssvae_z_mean = normalize(hsi_ssvae_z_mean)
hsi_ssvae_z_sd = normalize(hsi_ssvae_z_sd)
hsi_ssvae_z_labels_2d = hsi_ssvae_z_labels.reshape(ypix, xpix)

hsi_ssvae_0_labels = (hsi_ssvae_z_labels_2d == 0).nonzero(as_tuple=False)
hsi_ssvae_1_labels = (hsi_ssvae_z_labels_2d == 1).nonzero(as_tuple=False)
hsi_ssvae_2_labels = (hsi_ssvae_z_labels_2d == 2).nonzero(as_tuple=False)
hsi_ssvae_3_labels = (hsi_ssvae_z_labels_2d == 3).nonzero(as_tuple=False)

Identifying Classes¶

In [236]:
ssvae_label_plot(hsi, hsi_ssvae_0_labels) # 180 spectra from specified class
No description has been provided for this image
In [242]:
ssvae_label_plot(hsi, hsi_ssvae_1_labels)
No description has been provided for this image
In [243]:
ssvae_label_plot(hsi, hsi_ssvae_2_labels)
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
/var/folders/2t/38t43z9j1jbd04b_hsjlptfw0000gn/T/ipykernel_92950/1746077235.py in <module>
----> 1 ssvae_label_plot(hsi, hsi_ssvae_2_labels)

/var/folders/2t/38t43z9j1jbd04b_hsjlptfw0000gn/T/ipykernel_92950/2819777500.py in ssvae_label_plot(img, pts, starting_index, save)
    190 
    191         fig_plt = fig.add_subplot(gs[row_count,0])
--> 192         fig_plt.plot(img[:, pts[i,0], pts[i,1]], 
    193                      c=color[row_count], lw=3)
    194         plt.xticks([])

IndexError: index 2 is out of bounds for dimension 0 with size 2
No description has been provided for this image
In [244]:
ssvae_label_plot(hsi, hsi_ssvae_3_labels)
No description has been provided for this image

Plotting Results¶

In [319]:
ssvae_plot("SSVAE on Artificial Data", hsi_ssvae_z_mean, hsi_ssvae_z_labels, xpix, ypix)
No description has been provided for this image
In [247]:
# X-axis
hsi_ssvae_z_mean_x = hsi_ssvae_z_mean[:,0].detach().cpu().numpy()
hsi_ssvae_z_mean_x = hsi_ssvae_z_mean_x.reshape(ypix, xpix)
print("X Min: ", (hsi_ssvae_z_mean_x==np.min(hsi_ssvae_z_mean_x)).nonzero())
print("X Max: ", (hsi_ssvae_z_mean_x==np.max(hsi_ssvae_z_mean_x)).nonzero())
hsi_ssvae_z_mean_x_sort = np.dstack(
    np.unravel_index(np.argsort(hsi_ssvae_z_mean_x.ravel()), (ypix, xpix)))
hsi_ssvae_z_mean_x_sort = np.squeeze(hsi_ssvae_z_mean_x_sort)

spectra_plot("SSVAE on Artificial Data X-Axis Spectra", hsi, hsi_ssvae_z_mean_x_sort, xpix, ypix)
X Min:  (array([25, 25]), array([53, 54]))
X Max:  (array([  0,   0,   0, ..., 149, 149, 149]), array([  0,   1,   2, ..., 147, 148, 149]))
No description has been provided for this image
In [248]:
# Y-axis
hsi_ssvae_z_mean_y = hsi_ssvae_z_mean[:,1].detach().cpu().numpy()
hsi_ssvae_z_mean_y = hsi_ssvae_z_mean_y.reshape(ypix, xpix)
print("Y Min: ", (hsi_ssvae_z_mean_y==np.min(hsi_ssvae_z_mean_y)).nonzero())
print("Y Max: ", (hsi_ssvae_z_mean_y==np.max(hsi_ssvae_z_mean_y)).nonzero())
hsi_ssvae_z_mean_y_sort = np.dstack(
    np.unravel_index(np.argsort(hsi_ssvae_z_mean_y.ravel()), (ypix, xpix)))
hsi_ssvae_z_mean_y_sort = np.squeeze(hsi_ssvae_z_mean_y_sort)
spectra_plot("SSVAE on Artificial Data Y-Axis Spectra", hsi, hsi_ssvae_z_mean_y_sort, xpix, ypix)
Y Min:  (array([35, 35, 35, 35, 35, 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37,
       38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 40, 40, 40, 40, 40, 41,
       41, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 44, 44, 56, 56, 58, 58,
       59, 59, 61, 61, 61, 64, 64, 64, 69, 69, 73, 74, 74, 74, 77, 77, 79,
       79, 80, 80, 80, 80, 80, 80, 80, 80, 80, 81, 81, 81]), array([75, 76, 77, 78, 79, 71, 72, 73, 74, 85, 86, 87, 67, 68, 69, 70, 77,
       62, 63, 64, 65, 66, 85, 87, 58, 59, 60, 71, 55, 56, 57, 93, 94, 49,
       50, 51, 52, 95, 96, 58, 85, 98, 99, 50, 51, 84, 85, 48, 49, 54, 55,
       55, 56, 59, 60, 61, 64, 65, 66, 69, 70, 77, 73, 74, 75, 75, 76, 92,
       93, 80, 81, 82, 83, 86, 87, 88, 89, 90, 90, 91, 92]))
Y Max:  (array([ 40,  40, 111, 111]), array([14, 15, 73, 74]))
No description has been provided for this image
In [320]:
custom_spect_labels = [hsi_ssvae_0_labels[0], hsi_ssvae_1_labels[0], 
                            hsi_ssvae_2_labels[1], hsi_ssvae_3_labels[0]]

# Plot one spectrum from each class in the latent representation of the data
custom_spect_label_plot("Classes from SSVAE on Artificial Data", hsi, custom_spect_labels)
No description has been provided for this image